> Building Agents with LangChain

Budding
planted Jan 8, 2026tended Jan 8, 2026
#ai-agents#langchain#python#framework

Building Agents with LangChain

🌿 Budding note β€” practical LangChain agent development.

Setup

pip install langchain langchain-anthropic langchain-community
from langchain_anthropic import ChatAnthropic
from langchain.agents import AgentExecutor, create_react_agent
from langchain.tools import Tool
from langchain import hub

# Initialize LLM
llm = ChatAnthropic(
    model="claude-sonnet-4-5-20250929",
    temperature=0
)

Related: AI Agents Fundamentals and Agent Frameworks Comparison

Quick Start: ReAct Agent

from langchain_community.tools import DuckDuckGoSearchRun
from langchain.agents import create_react_agent, AgentExecutor
from langchain import hub

# Define tools
search = DuckDuckGoSearchRun()

tools = [
    Tool(
        name="Search",
        func=search.run,
        description="Search web for current information"
    )
]

# Get ReAct prompt
prompt = hub.pull("hwchase17/react")

# Create agent
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(
    agent=agent,
    tools=tools,
    verbose=True,
    handle_parsing_errors=True
)

# Run
result = agent_executor.invoke({
    "input": "What are the latest AI developments in 2026?"
})
print(result["output"])

Tool Definition

from langchain.tools import Tool, StructuredTool
from pydantic import BaseModel, Field

# Simple tool
def calculator(expression: str) -> str:
    """Evaluate math expression"""
    try:
        return str(eval(expression))
    except:
        return "Error: Invalid expression"

calc_tool = Tool(
    name="Calculator",
    func=calculator,
    description="Perform mathematical calculations"
)

# Structured tool with validation
class WeatherInput(BaseModel):
    location: str = Field(description="City name")
    unit: str = Field(description="Temperature unit", default="celsius")

def get_weather(location: str, unit: str = "celsius") -> str:
    """Get weather for location"""
    return f"Weather in {location}: 20Β°{unit[0].upper()}"

weather_tool = StructuredTool.from_function(
    func=get_weather,
    name="GetWeather",
    description="Get current weather",
    args_schema=WeatherInput
)

Agent Types

1. Zero-Shot ReAct

Standard reasoning + action agent:

from langchain.agents import create_react_agent

agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(
    agent=agent,
    tools=tools,
    max_iterations=10,
    verbose=True
)

2. Conversational Agent

With memory:

from langchain.memory import ConversationBufferMemory
from langchain.agents import create_react_agent

memory = ConversationBufferMemory(
    memory_key="chat_history",
    return_messages=True
)

agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(
    agent=agent,
    tools=tools,
    memory=memory,
    verbose=True
)

# Maintains context across calls
executor.invoke({"input": "What's 5 + 3?"})
executor.invoke({"input": "Multiply that by 2"})  # Remembers previous result

Related: Agent Memory Systems

3. OpenAI Functions Agent

Uses function calling:

from langchain.agents import create_openai_functions_agent

# Works with Claude too
agent = create_openai_functions_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)

Memory Types

Buffer Memory

from langchain.memory import ConversationBufferMemory

memory = ConversationBufferMemory(
    memory_key="history",
    return_messages=True
)

Summary Memory

from langchain.memory import ConversationSummaryMemory

memory = ConversationSummaryMemory(
    llm=llm,
    memory_key="history",
    return_messages=True
)
# Automatically summarizes old messages

Vector Store Memory

from langchain.memory import VectorStoreRetrieverMemory
from langchain_community.vectorstores import Qdrant

vectorstore = Qdrant.from_texts(
    texts=[],
    embedding=embeddings,
    location=":memory:"
)

memory = VectorStoreRetrieverMemory(
    retriever=vectorstore.as_retriever(search_kwargs={"k": 5})
)

Chains

Compose multiple steps:

from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

# Simple chain
prompt = PromptTemplate(
    input_variables=["topic"],
    template="Write a short summary about {topic}"
)

chain = LLMChain(llm=llm, prompt=prompt)
result = chain.run(topic="AI agents")

# Sequential chains
from langchain.chains import SimpleSequentialChain

research_chain = LLMChain(llm=llm, prompt=research_prompt)
write_chain = LLMChain(llm=llm, prompt=write_prompt)

pipeline = SimpleSequentialChain(
    chains=[research_chain, write_chain],
    verbose=True
)

result = pipeline.run("AI agents")

LangGraph for Complex Workflows

from langgraph.graph import StateGraph, END
from typing import TypedDict

# Define state
class AgentState(TypedDict):
    messages: list
    next_step: str

# Define nodes
def research_node(state: AgentState):
    # Research logic
    return {"messages": state["messages"] + ["research done"], "next_step": "write"}

def write_node(state: AgentState):
    # Writing logic
    return {"messages": state["messages"] + ["article written"], "next_step": "end"}

# Build graph
workflow = StateGraph(AgentState)
workflow.add_node("research", research_node)
workflow.add_node("write", write_node)
workflow.set_entry_point("research")
workflow.add_edge("research", "write")
workflow.add_edge("write", END)

app = workflow.compile()

# Execute
result = app.invoke({
    "messages": [],
    "next_step": "research"
})

Related: Multi-Agent Systems

Custom Tools

API Tool

import requests

def api_call(endpoint: str) -> str:
    """Call external API"""
    response = requests.get(f"https://api.example.com/{endpoint}")
    return response.json()

api_tool = Tool(
    name="APICall",
    func=api_call,
    description="Call external API endpoint"
)

Database Tool

import sqlite3

class DatabaseTool:
    def __init__(self, db_path: str):
        self.db_path = db_path

    def query(self, sql: str) -> list:
        """Execute SQL query"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        cursor.execute(sql)
        results = cursor.fetchall()
        conn.close()
        return results

db = DatabaseTool("./data.db")
db_tool = Tool(
    name="DatabaseQuery",
    func=db.query,
    description="Query database with SQL"
)

Related: Tool Use and Function Calling

Error Handling

from langchain.callbacks import StdOutCallbackHandler

class ErrorHandlingCallback(StdOutCallbackHandler):
    def on_agent_error(self, error, **kwargs):
        print(f"Agent error: {error}")
        # Log, retry, or handle

executor = AgentExecutor(
    agent=agent,
    tools=tools,
    callbacks=[ErrorHandlingCallback()],
    handle_parsing_errors=True,
    max_iterations=10
)

Production Patterns

Caching

from langchain.cache import InMemoryCache
from langchain.globals import set_llm_cache

set_llm_cache(InMemoryCache())
# LLM responses cached automatically

Streaming

for chunk in executor.stream({"input": "Long task..."}):
    print(chunk, end="", flush=True)

Connection Points

Prerequisites:

Related:

Advanced: